slice_pool2\unsync/
mod.rs

1//! Unsynchronized memory pools.
2
3pub use self::owned::{SliceBox, SlicePool, Sliceable};
4use std::cell::RefCell;
5use crate::Chunk;
6
7mod owned;
8
9enum Order {
10  Preceding,
11  Following,
12}
13
14/// A non thread-safe chunk chain.
15struct ChunkChain(RefCell<Vec<Chunk>>);
16
17impl ChunkChain {
18  pub fn new(size: usize) -> Self {
19    ChunkChain(RefCell::new(vec![Chunk::new(size)]))
20  }
21
22  pub fn allocate(&self, size: usize) -> Option<Chunk> {
23    let mut chunks = self.0.borrow_mut();
24
25    // Find a chunk with the least amount of memory required
26    let (index, _) = chunks
27      .iter()
28      .enumerate()
29      .filter(|(_, chunk)| chunk.free && chunk.size >= size)
30      .min_by_key(|(_, chunk)| chunk.size)?;
31
32    // Determine whether there is any memory surplus
33    let delta = chunks[index].size - size;
34
35    if delta > 0 {
36      // Deduct the left over memory from the allocation
37      chunks[index].size -= delta;
38
39      if Self::has_free_adjacent(&chunks, index, Order::Preceding) {
40        // Increase the size of the preceding chunk
41        chunks[index - 1].size += delta;
42
43        // Shift the offset of the allocated chunk
44        chunks[index].offset += delta;
45      } else if Self::has_free_adjacent(&chunks, index, Order::Following) {
46        // Update the size and offset of the next chunk
47        chunks[index + 1].offset -= delta;
48        chunks[index + 1].size += delta;
49      } else {
50        // Insert a new chunk representing the surplus memory
51        let offset = chunks[index].offset + size;
52        chunks.insert(index + 1, Chunk::with_offset(delta, offset));
53        chunks[index].free = false;
54      }
55    } else {
56      // The allocation covers a single chunk
57      chunks[index].free = false;
58    }
59
60    Some(chunks[index])
61  }
62
63  pub fn release(&self, offset: usize) {
64    let mut chunks = self.0.borrow_mut();
65
66    let index = chunks
67      .binary_search_by_key(&offset, |chunk| chunk.offset)
68      .expect("releasing chunk");
69    let size = chunks[index].size;
70
71    if Self::has_free_adjacent(&chunks, index, Order::Preceding) {
72      // Increase the preceding chunk's size
73      chunks[index - 1].size += size;
74    } else if Self::has_free_adjacent(&chunks, index, Order::Following) {
75      // Increase the extent of the next chunk
76      chunks[index + 1].offset -= size;
77      chunks[index + 1].size += size;
78    } else {
79      // No free adjacent chunks, simply mark this one as free
80      chunks[index].free = true;
81      return;
82    }
83
84    chunks.remove(index);
85  }
86
87  fn has_free_adjacent(chunks: &[Chunk], index: usize, order: Order) -> bool {
88    match order {
89      Order::Preceding => index > 0 && chunks[index - 1].free,
90      Order::Following => index + 1 < chunks.len() && chunks[index + 1].free,
91    }
92  }
93}